# Copyright (c) HySoP 2011-2024
#
# This file is part of HySoP software.
# See "https://particle_methods.gricad-pages.univ-grenoble-alpes.fr/hysop-doc/"
# for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discrete fields (scalars or vectors) descriptions.
* :class:`~hysop.fields.discrete_field.CartesianDiscreteScalarFieldViewContainerI`
* :class:`~hysop.fields.discrete_field.CartesianDiscreteScalarField`
* :class:`~hysop.fields.discrete_field.CartesianDiscreteTensorField`
* :class:`~hysop.fields.discrete_field.CartesianDiscreteScalarFieldView`
Documentation and examples can be found in :ref:`fields`.
"""
import hashlib
import numpy as np
from hysop import vprint, dprint, MPI
from hysop.core.arrays.all import HostArray
from hysop.constants import (
Backend,
DirectionLabels,
GhostOperation,
GhostMask,
ExchangeMethod,
MemoryOrdering,
)
from hysop.topology.topology import Topology, TopologyView
from hysop.tools.decorators import debug, static_vars
from hysop.tools.htypes import check_instance, to_tuple, first_not_None, to_list
from hysop.tools.numpywrappers import npw
from hysop.tools.misc import prod
from hysop.tools.units import bytes2str
from hysop.fields.discrete_field import (
DiscreteScalarField,
DiscreteScalarFieldView,
DiscreteScalarFieldViewContainerI,
DiscreteTensorField,
)
from hysop.topology.cartesian_topology import (
CartesianTopology,
CartesianTopologyState,
CartesianTopologyView,
)
from hysop.testsenv import __HAS_OPENCL_BACKEND__
if __HAS_OPENCL_BACKEND__:
from hysop.core.arrays.all import OpenClArray
from hysop.backend.device.opencl import clArray
from hysop.core.mpi.topo_tools import TopoTools
[docs]
class CartesianDiscreteScalarFieldViewContainerI:
[docs]
def initialize(
self,
formula,
vectorize=False,
without_ghosts=True,
exchange_ghosts=True,
exchange_kwds=None,
only_finite=True,
reorder=None,
quiet=False,
components=None,
**kwds,
):
"""
Initialize the cartesian field data.
Parameters
----------
formula : python function or tuple of np.ndarray, optional
User-defined function of the space coordinates (at least)
used to compute field values for each grid poin.
Initialize directly from data if a tuple of np.ndarray is supplied.
vectorize : bool, optional
True if formula must be vectorized
(i.e. is of type 'user_func_2', see :ref:`fields` for details)
without_ghosts: boolean, optional, defaults to False
Do not initialize ghosts using formula.
In this case, ghosts may be initialized by ghost exchange (see next parameter).
exchange_ghosts: bool, optional, defaults to True
Should we exchange ghosts after initialization ?
exchange_kwds: dict, optional,
Extra exchange keyword arguments passed to ghost exchange.
Only used if exchange_ghosts is set to True.
only_finite: bool, optional
Check that initialized values are not +inf, -inf or NaN.
Defaults to True.
reorder: str or tuple of str, optional
Reorder all kwd according to current topology state.
ie. kwd should be contained in kwds and kwds[kwd] should
be of length self.domain.dim.
kwds : dict
Extra keyword arguments passed to formula, optional, depends on passed formula.
"""
if not self.has_unique_backend():
msg = "Cannot initialize a tensor field defined on multiple backends at a time."
raise RuntimeError(msg)
backend = self.backend
check_instance(components, tuple, values=(int, tuple), allow_none=True)
components = self.ids_to_components(
first_not_None(components, tuple(range(self.nb_components)))
)
check_instance(
components, tuple, values=int, minval=0, maxval=self.nb_components - 1
)
assert len(set(components)) == len(
components
), f"Repeated components: {components}"
def filter_components(x, components=components):
assert isinstance(x, tuple)
assert len(x) == self.nb_components
return tuple(
x[i] if (i in components) else None for i in range(self.nb_components)
)
reorder = to_tuple(first_not_None(reorder, ()))
check_instance(reorder, tuple, values=str)
for kwd in reorder:
if kwd not in kwds:
msg = f"{kwd} is not contained in passed keyword variables."
raise ValueError(msg)
vals = kwds[kwd]
if isinstance(vals, npw.ndarray):
if vals.ndim != self.dim:
msg = "Value contained at keyword {} index {} has incompatible dimension "
msg += "with respect to domain.dim={}, got val.ndim={}, cannot reorder."
msg = msg.format(kwd, self.dim, val.ndim)
raise ValueError(msg)
elif isinstance(vals[0], (tuple, list, npw.ndarray)):
for i, val in enumerate(vals):
if len(val) != self.dim:
msg = "Value contained at keyword {} index {} is not of length "
msg += "domain.dim={}, got {}, cannot reorder."
msg = msg.format(kwd, i, self.dim, val)
raise ValueError(msg)
else:
if len(kwds[kwd]) != self.dim:
msg = (
"Value contained at keyword {} is not of length domain.dim={}, "
)
msg += "got {}, cannot reorder."
msg = msg.format(kwd, self.dim, val)
raise ValueError(msg)
(from_raw_data, from_formula) = (False, False)
if isinstance(formula, (np.ndarray, tuple)):
formula = to_tuple(formula)
msg = "\n>Initializing discrete field {} from raw array data."
msg = msg.format(self.name)
if not quiet:
vprint(msg)
from_raw_data = True
else:
msg = "\n>Initializing discrete field {} with formula {}."
msg = msg.format(self.pretty_name, formula.__name__)
if not quiet:
vprint(msg)
from_formula = True
# buffers = backend dependent buffers of the current field
# data = host buffers corresponding to backend buffer shapes and dtype
dfields = ()
buffers = ()
coords = ()
for i in range(self.nb_components):
if i not in components:
dfields += (None,)
buffers += (None,)
coords += (None,)
else:
# dfield may be read-only because of topology state, rediscretize with write access
dfield = self.dfields[i]
topo_view = dfield.topology
topo = topo_view.topology
wstate = topo_view.topology_state.copy(
memory_order=MemoryOrdering.C_CONTIGUOUS, is_read_only=False
) # get a writtable state
dfield = dfield.field.discretize(topo, wstate)
dfields += (dfield,)
if without_ghosts:
buffers += (dfield.compute_data[0],)
coords += (dfield.compute_mesh_coords,)
else:
buffers += (dfield.sdata,)
coords += (dfield.mesh_coords,)
if backend.kind == Backend.HOST:
data = buffers
else:
# we are not on host, allocate temporary buffers
msg = " *Allocating temporary buffers on host."
if not quiet:
vprint(msg)
host_backend = self.backend.host_array_backend
data = tuple(
(
host_backend.empty(shape=d.shape, dtype=d.dtype)
if (d is not None)
else None
)
for d in buffers
)
if from_formula:
# initialize from a python method
assert "data" not in kwds, "data is a reserved keyword."
assert "coords" not in kwds, "coords is a reserved keyword."
assert "component" not in kwds, "component is a reserved keyword."
# vectorize formula if requested
if vectorize and not isinstance(formula, np.lib.function_base.vectorize):
formula = np.vectorize(formula)
# call formula for each component
for i in components:
topology_state = dfields[i].topology_state
formula_kwds = dict(data=data[i], coords=coords[i], component=i)
formula_kwds.update(kwds)
for kwd in reorder:
vals = kwds[kwd]
if isinstance(
vals[0], (tuple, list, np.ndarray)
) and not isinstance(vals, np.ndarray):
vals = to_list(vals)
for i, val in enumerate(vals):
vals[i] = topology_state.transposed(val)
formula_kwds[kwd] = vals
else:
formula_kwds[kwd] = topology_state.transposed(vals)
# call formula
formula(**formula_kwds)
elif from_raw_data:
# initialize from raw data
assert len(formula) == len(data)
all_src_slices = kwds.pop("src_slices", (Ellipsis,) * len(formula))
for i, (dsrc, ddst) in enumerate(zip(formula, data)):
if ddst is None:
continue
src_slices = all_src_slices[i]
dst_slices = kwds.pop("dst_slices", Ellipsis)
check_instance(dsrc, (np.ndarray, HostArray))
check_instance(ddst, (np.ndarray, HostArray))
src_shape = dsrc[src_slices].shape
dst_shape = ddst[dst_slices].shape
if src_shape != dst_shape:
msg = "Cannot initialize field from raw data because the shapes do not match."
msg += "\n"
msg += "\n Destination field:"
msg += f"\n base_shape: {ddst.shape}"
msg += f"\n base_dtype: {ddst.dtype}"
msg += f"\n dst_slices: {dst_slices}"
msg += f"\n dst_shape: {dst_shape}"
msg += "\n"
msg += "\n Raw source data:"
msg += f"\n base_shape: {dsrc.shape}"
msg += f"\n base_dtype: {dsrc.dtype}"
msg += f"\n src_slices: {src_slices}"
msg += f"\n src_shape: {src_shape}"
msg += "\n"
raise RuntimeError(msg)
ddst[dst_slices] = dsrc[src_slices]
else:
msg = "Unknown initialization kind."
raise RuntimeError(msg)
assert len(data) == len(buffers) == self.nb_components
assert all(
((d0 is None) and (d1 is None)) or (d0.size == d1.size)
for (d0, d1) in zip(buffers, data)
), "Array size was altered."
assert all(
((d0 is None) and (d1 is None)) or (d0.dtype == d1.dtype)
for (d0, d1) in zip(buffers, data)
), "Array dtype was altered."
assert all(
((d0 is None) and (d1 is None)) or (d0.shape == d1.shape)
for (d0, d1) in zip(buffers, data)
), "Array shape was altered."
if only_finite:
for i, d in enumerate(data):
if d is None:
continue
if np.isnan(d).any():
msg = "Initialization of {} on component {} failed, got NaNs."
msg = msg.format(self.pretty_name, i)
print(d)
raise RuntimeError(msg)
if not np.isfinite(d).all():
msg = (
"Initialization of {} on component {} failed, "
+ "got values that are not finite."
)
msg = msg.format(self.pretty_name, i)
raise RuntimeError(msg)
if backend.kind != Backend.HOST:
msg = " *Copying temporary buffers to backend {}."
msg = msg.format(backend.kind)
if not quiet:
vprint(msg)
for b, d in zip(buffers, data):
if d is not None:
b[...] = d
if exchange_ghosts:
msg = " *Exchanging ghosts on backend {}."
msg = msg.format(backend.kind)
if not quiet:
vprint(msg)
exchange_kwds = first_not_None(exchange_kwds, {})
exchange_kwds["components"] = components
evt = self.exchange_ghosts(**exchange_kwds)
if evt is not None:
evt.wait()
[docs]
def norm(self, p=2, data=None, view=None):
"""
Compute a per-component Lp norm of the discrete field.
Lp-norm = sum(data**p)**(1.0/p)
summed on all grid points excluding ghosts.
Attributes
----------
p: int or float, optional
Parameter for the Lp-norm.
p can be any integer (or float) greater than 0.
Infinite norm can be returned by setting p to np.inf.
Defaults to the Euclidean norm (p=2).
If p!=np.inf, the result is scaled by the mesh size.
data: tuple of Array, optional
Custom data to apply the norm to.
By default this is the local to field data.
view:
View on data.
"""
if data is not None:
data = to_tuple(data)
view = first_not_None(view, self.compute_slices)
else:
data = self.data
view = first_not_None(view, Ellipsis)
check_instance(p, (int, float))
check_instance(data, tuple)
nb_components = len(data)
result = npw.zeros((nb_components,))
gresult = npw.zeros((nb_components,))
if p == np.inf:
for i, d in enumerate(data):
result[i] = abs(d[view]).max().get()
self.topology.cart_comm.Allreduce(result, gresult, op=MPI.MAX)
norm = gresult
else:
for i, d in enumerate(data):
tmp = abs(d[view]) ** p
result[i] = tmp.sum().get() * npw.prod(self.space_step)
self.topology.cart_comm.Allreduce(result, gresult, op=MPI.SUM)
norm = gresult ** (1.0 / p)
return norm
[docs]
def distance(self, other, **kwds):
"""
Compute the distance between two discrete fields
p-distance = (sum(|data[d]|**p)**1/p for d = 1..dim
where data[d] = |self.data[d] - other.data[d]|
summed on all grid points excluding ghosts.
See CartesianDiscreteScalarField.norm() for keywords parameters.
"""
assert "data" not in kwds
assert self.nb_components == other.nb_components
sview = self.compute_slices
oview = other.compute_slices
data = ()
for lhs, rhs in zip(self.data, other.data):
data += (lhs[sview].get() - rhs[oview].get(),)
return self.norm(data=data, **kwds)
[docs]
def collect_data(self, component=0, leader=0):
"""
Debug function to collect data across multiples processes.
"""
data = self.data[component][self.compute_slices].get().handle
slcs = self.mesh.global_compute_slices
comm = self.topology.cart_comm
all_data = comm.gather(data, root=leader)
all_slcs = comm.gather(slcs, root=leader)
if comm.rank == leader:
array = npw.full(
dtype=data.dtype, shape=self.mesh.grid_resolution, fill_value=npw.nan
)
for d, view in zip(all_data, all_slcs):
array[view] = d
return array
else:
return None
[docs]
def print_with_ghosts(
self,
component=0,
compute=None,
data=None,
inner_ghosts=None,
outer_ghosts="X",
**print_opts,
):
"""
Print values with ghosts replaced by symbol.
Mainly for debug purposes.
"""
data = first_not_None(data, self.data[component].get())
strarr = np.empty_like(data, dtype=object)
strarr[...] = data
if compute is not None:
if callable(compute):
compute = np.vectorize(compute)
strarr[self.compute_slices] = compute(strarr[self.compute_slices])
else:
strarr[self.compute_slices] = compute
if inner_ghosts is not None:
for lg, rg, _ in self.inner_ghost_slices:
if callable(inner_ghosts):
inner_ghosts = np.vectorize(inner_ghosts)
strarr[lg] = inner_ghosts(strarr[lg])
strarr[rg] = inner_ghosts(strarr[rg])
else:
strarr[lg] = inner_ghosts
strarr[rg] = inner_ghosts
if outer_ghosts is not None:
for ndir in self.all_outer_ghost_slices:
for directions in self.all_outer_ghost_slices[ndir]:
for disp, (slc, _) in self.all_outer_ghost_slices[ndir][
directions
].items():
if (sum(d != 0 for d in disp) == ndir) and ndir:
if callable(outer_ghosts):
outer_ghosts = np.vectorize(outer_ghosts)
strarr[slc] = outer_ghosts(strarr[slc])
else:
strarr[slc] = outer_ghosts
_formatter = {object: lambda x: f"{x:^6}"[:6], float: lambda x: f"{x:+6.2f}"}
_print_opts = dict(
threshold=10000,
linewidth=1000,
nanstr="nan",
infstr="inf",
formatter={
"object": lambda x: _formatter.get(type(x), _formatter[object])(x)
},
)
_print_opts.update(print_opts)
from hysop.tools.contexts import printoptions
with printoptions(**_print_opts):
print(strarr)
@property
def compute_data(self):
"""Like data, but with a view on compute_slices."""
return tuple(df.sdata[df.compute_slices] for df in self.discrete_field_views())
@property
def compute_buffers(self):
"""Like buffers, but with a view on compute_slices."""
return tuple(
df.sbuffer[df.compute_slices] for df in self.discrete_field_views()
)
[docs]
def local_slices(self, ghosts):
"""
Return a tuple of slices indexing the local compute mesh, including specified
number of ghosts on each axis.
"""
if not self.has_unique_compute_slices():
msg = "Field container does not have unique compute slices."
raise RuntimeError(msg)
cslc = self.compute_slices
assert len(ghosts) == len(cslc)
gslc = ()
for slc, g in zip(cslc, ghosts):
(start, stop, step) = slc.start, slc.stop, slc.step
assert step in (1, None)
slc = slice(start - g, stop + g)
gslc += (slc,)
return gslc
[docs]
def has_unique_compute_resolution(self):
return self.has_unique_attribute("compute_resolution")
[docs]
def has_unique_resolution(self):
return self.has_unique_attribute("resolution")
[docs]
def has_unique_ghosts(self):
return self.has_unique_attribute("ghosts")
[docs]
def has_unique_space_step(self):
return self.has_unique_attribute("space_step")
[docs]
def has_unique_coords(self):
return self.has_unique_attribute("coords")
[docs]
def has_unique_mesh_coords(self):
return self.has_unique_attribute("mesh_coords")
[docs]
def has_unique_compute_slices(self):
return self.has_unique_attribute("compute_slices")
[docs]
def has_unique_inner_ghost_slices(self):
return self.has_unique_attribute("inner_ghost_slices")
[docs]
def has_unique_outer_ghost_slices(self):
return self.has_unique_attribute("outer_ghost_slices")
[docs]
def has_unique_grid_npoints(self):
return self.has_unique_attribute("grid_npoints")
[docs]
def has_unique_axes(self):
return self.has_unique_attribute("axes")
[docs]
def has_unique_tstate(self):
return self.has_unique_attribute("tstate")
[docs]
def has_unique_memory_order(self):
return self.has_unique_attribute("memory_order")
[docs]
def has_unique_local_boundaries(self):
return self.has_unique_attribute("local_boundaries")
[docs]
def has_unique_local_lboundaries(self):
return self.has_unique_attribute("local_lboundaries")
[docs]
def has_unique_local_rboundaries(self):
return self.has_unique_attribute("local_rboundaries")
[docs]
def has_unique_global_boundaries(self):
return self.has_unique_attribute("global_boundaries")
[docs]
def has_unique_global_lboundaries(self):
return self.has_unique_attribute("global_lboundaries")
[docs]
def has_unique_global_rboundaries(self):
return self.has_unique_attribute("global_rboundaries")
[docs]
def has_unique_is_at_boundary(self):
return self.has_unique_attribute("is_at_boundary")
[docs]
def has_unique_is_at_left_boundary(self):
return self.has_unique_attribute("is_at_left_boundary")
[docs]
def has_unique_is_at_right_boundary(self):
return self.has_unique_attribute("is_at_right_boundary")
[docs]
def has_unique_periodicity(self):
return self.has_unique_attribute("periodicity")
@property
def compute_resolution(self):
return self.get_unique_attribute("compute_resolution")
@property
def resolution(self):
return self.get_unique_attribute("resolution")
@property
def ghosts(self):
return self.get_unique_attribute("ghosts")
@property
def space_step(self):
return self.get_unique_attribute("space_step")
@property
def coords(self):
return self.get_unique_attribute("coords")
@property
def mesh_coords(self):
return self.get_unique_attribute("mesh_coords")
@property
def compute_coords(self):
return self.get_unique_attribute("compute_coords")
@property
def compute_mesh_coords(self):
return self.get_unique_attribute("compute_mesh_coords")
@property
def compute_slices(self):
return self.get_unique_attribute("compute_slices")
@property
def inner_ghost_slices(self):
return self.get_unique_attribute("inner_ghost_slices")
@property
def outer_ghost_slices(self):
return self.get_unique_attribute("outer_ghost_slices")
@property
def all_inner_ghost_slices(self):
return self.get_unique_attribute("all_inner_ghost_slices")
@property
def all_outer_ghost_slices(self):
return self.get_unique_attribute("all_outer_ghost_slices")
@property
def grid_npoints(self):
return self.get_unique_attribute("grid_npoints")
@property
def axes(self):
return self.get_unique_attribute("axes")
@property
def tstate(self):
return self.get_unique_attribute("tstate")
@property
def memory_order(self):
return self.get_unique_attribute("memory_order")
@property
def local_boundaries(self):
return self.get_unique_attribute("local_boundaries")
@property
def local_lboundaries(self):
return self.get_unique_attribute("local_lboundaries")
@property
def local_rboundaries(self):
return self.get_unique_attribute("local_rboundaries")
@property
def global_boundaries(self):
return self.get_unique_attribute("global_boundaries")
@property
def global_lboundaries(self):
return self.get_unique_attribute("global_lboundaries")
@property
def global_rboundaries(self):
return self.get_unique_attribute("global_rboundaries")
@property
def is_at_boundary(self):
return self.get_unique_attribute("is_at_boundary")
@property
def is_at_left_boundary(self):
return self.get_unique_attribute("is_at_left_boundary")
@property
def is_at_right_boundary(self):
return self.get_unique_attribute("is_at_right_boundary")
@property
def periodicity(self):
return self.get_unique_attribute("periodicity")
[docs]
class CartesianDiscreteScalarFieldView(
CartesianDiscreteScalarFieldViewContainerI, DiscreteScalarFieldView
):
"""
View over a CartesianDiscreteScalarField.
"""
__slots__ = ("_dfield", "_topology_state", "_topology_view", "_data_view")
[docs]
@debug
def __new__(cls, dfield, topology_state, **kwds):
"""
Initialize a CartesianDiscreteScalarFieldView on given discrete cartesian
field with given cartesian topology state.
Parameters
----------
dfield: :class:`hysop.fields.cartesian_discrete_field.CartesianDiscreteScalarField`
The discrete field this view is on.
topology_state: :class:`hysop.topology.cartesian.CartesianTopologyState`
The topology state of this view (optional).
kwds: dict
Base class arguments.
"""
check_instance(
dfield,
CartesianDiscreteScalarField,
allow_none=issubclass(cls, CartesianDiscreteScalarField),
)
check_instance(topology_state, CartesianTopologyState)
obj = super().__new__(cls, dfield=dfield, topology_state=topology_state, **kwds)
obj._data_view = None
return obj
@debug
def __init__(self, dfield, topology_state, **kwds):
super().__init__(dfield=dfield, topology_state=topology_state, **kwds)
def _compute_data_view(self, data=None):
"""
Compute transposed views of underlying discrete field data
according to topology state.
This is called after the discrete field has allocated data.
Arrays are reshaped and set read-only if necessary.
This can also be called from an hysop.backend.host.host_operator.OpenClMappable object
to map an opencl generated pointer to host (in this case custom data is passed
and self_data == False).
"""
self_data = data is None
data = first_not_None(data, self._dfield._data)
if data is None:
if self_data:
msg = "{}::{} internal data has not been set yet."
else:
msg = "{}::{} cannot compute data view from external None data."
msg = msg.format(type(self._dfield).__name__, self._dfield.name)
raise RuntimeError(msg)
if self.memory_order is MemoryOrdering.C_CONTIGUOUS:
dataview = data.reshape(self.resolution)
assert dataview.flags.c_contiguous
elif self.memory_order is MemoryOrdering.F_CONTIGUOUS:
dataview = data.reshape(self.resolution[::-1])
dataview = dataview.T
assert dataview.flags.f_contiguous
else:
msg = f"Unknown memory order {self.memory_order}."
raise NotImplementedError(msg)
assert all(dataview.shape == self.resolution)
if self.is_read_only:
if isinstance(dataview, np.ndarray):
npw.set_readonly(dataview)
if self_data:
self._data_view = dataview
else:
return dataview
def __get_data_view(self):
if self._data_view is None:
self._compute_data_view()
return self._data_view
def __prepare_data(self, data):
"""Prepare input data for copy or swap."""
if isinstance(data, tuple):
assert len(data) == 1
data = data[0]
backend = self.backend
if backend.can_wrap(data):
data = backend.wrap(data)
if data.ndim != self.dim:
msg = (
"Array dimension {} is not compatible with discrete field dimension {}."
)
msg = msg.format(data.ndim, self.dim)
raise ValueError(msg)
elif data.size != self.npoints:
msg = "Array size {} is not compatible with discrete field size {}."
msg = msg.format(data.size, self.size)
raise ValueError(msg)
elif data.dtype != self.dtype:
msg = "dtype {} is not compatible with discrete field dtype {}."
msg = msg.format(data.dtype, self.dtype)
raise ValueError(msg)
elif any(data.shape != self.resolution):
msg = "shape {} is not compatible with discrete field resolution {}."
msg = msg.format(data.shape, self.resolution)
raise ValueError(msg)
return data
def _get_data(self):
"""
Return contained array as a tuple.
"""
return (self.__get_data_view(),)
def _set_data(self, copy_data):
"""Performs a copy from copy_data to self.data"""
msg = "Discrete field {}::{}::set_data()."
msg = msg.format(self.backend.kind, self.name)
dprint(msg)
msg = f"CartesianDiscreteScalarField {self.name} is read-only."
assert not self.is_read_only, msg
src = self.__prepare_data(data=copy_data)
self.sdata.copy_from(src)
def _get_buffers(self):
"""
Return all array data as a buffers as a tuple.
"""
d = self.__get_data_view()
if self.backend.kind == Backend.HOST:
return (d.handle.view(type=np.ndarray),)
else:
return (d.handle,)
def _get_sdata(self):
"""
Return contained array.
"""
assert self.is_scalar
return self._get_data()[0]
def _set_sdata(self, copy_data):
"""Performs a copy from copy_data to self.data"""
self._set_data(copy_data)
def _get_sbuffer(self):
"""
Return container buffer.
"""
assert self.is_scalar
return self._get_buffers()[0]
def __call__(self, key):
msg = "Getting buffers by using __getitem__ and __call__ has been deprecated, "
msg += "because this would clash with the DiscreteTensorField interface."
msg += "\nAll fields are now scalar (one component only), "
msg += "use the 'buffers' attribute instead."
raise RuntimeError(msg)
[docs]
def local_slices(self, ghosts):
"""
Return a tuple of slices indexing the local compute mesh, including specified
number of ghosts on each axis.
"""
assert len(ghosts) == self.domain.dim
cslc = self.compute_slices
gslc = ()
for slc, g in zip(cslc, ghosts):
(start, stop, step) = slc.start, slc.stop, slc.step
assert step in (1, None)
slc = slice(start - g, stop + g)
gslc += (slc,)
return gslc
def _get_axes(self):
"""Return the permutation scheme in numpy notations."""
return self._topology_state.axes
def _get_tstate(self):
"""Return the permutation scheme as a hysop.constants.TranspositionState."""
return self._topology_state.tstate
def _get_memory_order(self):
"""Return the memory order of underlying data."""
return self._topology_state.memory_order
def _get_size(self):
"""Size of the underlying contiguous data arrays."""
msg = "size has been deprecated for ScalarDiscreteFields, use npoints instead."
raise AttributeError(msg)
def _get_shape(self):
"""Alias for resolution."""
msg = "shape has been deprecated for ScalarDiscreteFields, use resolution instead."
raise AttributeError(msg)
def _get_compute_resolution(self):
"""Get compute resolution (mesh size without ghosts) on local discrete field mesh."""
return self.mesh.compute_resolution
def _get_resolution(self):
"""Get resolution (mesh size with ghosts) on local discrete field mesh."""
return self.mesh.local_resolution
def _get_npoints(self):
"""Get resolution (mesh size with ghosts) on local discrete field mesh."""
return self.mesh.local_npoints
def _get_ghosts(self):
"""Get the number of ghosts per direction on local discrete field mesh."""
return self.mesh.ghosts
def _get_compute_slices(self):
"""Return a tuple of slices indexing the local compute mesh."""
return self.mesh.local_compute_slices
def _get_grid_npoints(self):
"""Return the effective number of global computational points."""
return self.mesh.grid_npoints
def _get_space_step(self):
"""Get the space step of the discretized mesh grid."""
return self.mesh.space_step
def _get_coords(self):
"""Get local mesh physical coordinates in each direction (including ghosts)."""
return self.mesh.local_coords
def _get_mesh_coords(self):
"""
Get local mesh physical coordinates in each direction (including ghosts).
Same as self.coords but with all sequences as nd arrays and reversed.
Returned coordinates are x,y,z,...) instead of (...,z,y,x).
"""
return self.mesh.local_mesh_coords
def _get_compute_coords(self):
"""Get local mesh physical coordinates in each direction (excluding ghosts)."""
return self.mesh.local_compute_coords
def _get_compute_mesh_coords(self):
"""
Get local mesh physical coordinates in each direction (excluding ghosts).
Same as self.compute_coords but with all sequences as nd arrays and reversed.
Returned coordinates are x,y,z,...) instead of (...,z,y,x).
"""
return self.mesh.local_compute_mesh_coords
def _get_is_tmp(self):
"""Is this DiscreteScalarField temporary ?"""
return self._dfield.is_tmp
def _get_mem_tag(self):
return self._dfield.mem_tag
def _get_global_lboundaries(self):
"""Return global left boundaries."""
return self.mesh.global_lboundaries
def _get_global_rboundaries(self):
"""Return global right boundaries."""
return self.mesh.global_rboundaries
def _get_global_boundaries(self):
"""Return global boundaries as a tuple of left and right boundaries."""
return self.mesh.global_boundaries
def _get_local_lboundaries(self):
"""
Return local left boundaries kind.
Boundaries on the interior of the global domain have value BoundaryCondition.NONE.
"""
return self.mesh.local_lboundaries
def _get_local_rboundaries(self):
"""
Return local right boundaries kind.
Boundaries on the interior of the global domain have value BoundaryCondition.NONE.
"""
return self.mesh.local_rboundaries
def _get_local_boundaries(self):
"""
Return local boundaries kind as a tuple of left and right boundaries.
Boundaries on the interior of the global domain have value BoundaryCondition.NONE.
"""
return self.mesh.local_boundaries
def _get_global_boundaries_config(self):
"""
Return global boundaries configuration (boundary kind + attached data).
"""
return tuple(self.global_lboundaries_config, self.global_rboundaries_config)
def _get_global_lboundaries_config(self):
"""
Return global left boundaries configuration (boundary kind + attached data).
"""
return self.topology_state.transposed(self.field.lboundaries)
def _get_global_rboundaries_config(self):
"""
Return global right boundaries configuration (boundary kind + attached data).
"""
return self.topology_state.transposed(self.field.rboundaries)
def _get_periodicity(self):
"""
Get periodicity of the global boundaries.
This is not to be confused with the cartesian communicator periodicity.
"""
return self.mesh.periodicity
def _get_is_at_left_boundary(self):
"""
Return a numpy boolean mask to identify processes that are on the left of the domain.
ie. is_at_left_boundary[d] = True means that process cartesian coordinates is the first
on direction d: topology.proc_coords[d] == 0.
"""
return self.mesh.is_at_left_boundary
def _get_is_at_right_boundary(self):
"""
Return a numpy boolean mask to identify processes that are on the right of the domain.
ie. is_at_right_boundary[d] = True means that process cartesian coordinates
is the lastest on direction d: topology.proc_coords[d] == topology.proc_shape[d] - 1.
"""
return self.mesh.is_at_right_boundary
def _get_is_at_boundary(self):
"""
Return a numpy boolean mask to identify processes that are on either on the left or on
the right of the domain. Processes can be on the left and the right at the same time on
direction d if and only if topology.proc_shape[d] == 1.
ie. is_at_boundary[d] = True means that process cartesian coordinates is the first or
the lastest on direction d:(proc_coords[d] in [0, proc_shape[d] - 1]).
"""
return self.mesh.is_at_boundary
[docs]
def get_outer_ghost_slices(self, *args, **kwds):
"""
Return a tuple of tuples of slices indexing the local outer ghosts.
Those slices corresponds to local to process ghosts,
EXCLUDING diagonal neighbour processes ghost overlaps.
See CartesianMesh.get_local_outer_ghost_slices().
"""
return self.mesh.get_local_outer_ghost_slices(*args, **kwds)
[docs]
def get_inner_ghost_slices(self, *args, **kwds):
"""
Return a tuple of tuples of slices indexing the local inner ghosts.
Those slices corresponds to neighbour processes overlap on local
compute slices, EXCLUDING diagonal neighbour processes ghost
overlaps.
See CartesianMesh.get_local_inner_ghost_slices().
"""
return self.mesh.get_local_inner_ghost_slices(*args, **kwds)
[docs]
def get_all_outer_ghost_slices(self, *args, **kwds):
"""
Return collection of slices and shapes describing all possible combinations
of outer ghosts slices in this array as local indices.
Those slices corresponds to local to process ghosts (ie. ghosts that may
be received from other neighbor processes, including diagonal processes,
during a ghosts exchange).
Return a dictionnary {ndirections} (number of directions intersected)
-> {directions} (0=first axis, ..., dim-1=last axis)
-> {displacements} (-1=LEFT, 0=CENTER, +1=RIGHT)
-> (view, shape)
"""
return self.mesh.get_all_local_outer_ghost_slices(*args, **kwds)
[docs]
def get_all_inner_ghost_slices(self, *args, **kwds):
"""
Return collection of slices and shapes describing all possible
combinations of inner ghosts slices in this array as local indices.
Those slices corresponds to local to process ghosts (ie. ghosts that may
be sent to other neighbor processes, including diagonal procecess,
during a ghosts exchange).
Return a dictionnary {ndirections} (number of directions intersected)
-> {directions} (0=first axis, ..., dim-1=last axis)
-> {displacements} (-1=LEFT, 0=CENTER, +1=RIGHT)
-> (view, shape)
"""
return self.mesh.get_all_local_inner_ghost_slices(*args, **kwds)
[docs]
def get_all_outer_ghost_slices_per_ncenters(self, *args, **kwds):
"""
Compute the collection of slices describing all possible combinations
of outer ghosts slice in this array as local indices like
self.get_all_local_outer_ghost_slices() and sort them by
number of centers (number of displacement == 0).
Return a list [ndirections] (number of directions intersected)
[ncenters] (number of null displacements)
-> {directions} (0=first axis, ..., dim-1=last axis)
-> {displacements} (-1=LEFT, 0=CENTER, +1=RIGHT)
-> (view,shape)
'all' -> tuple of all views and shapes up to this depth
"""
return self.mesh.get_all_local_outer_ghost_slices_per_ncenters(*args, **kwds)
[docs]
def get_all_inner_ghost_slices_per_ncenters(self, *args, **kwds):
"""
Compute the collection of slices describing all possible combinations
of inner ghosts slice in this array as local indices like
self.get_all_local_inner_ghost_slices() and sort them by
number of centers (number of displacement == 0).
Return a list [ndirections] (number of directions intersected)
[ncenters] (number of null displacements)
-> {directions} (0=first axis, ..., dim-1=last axis)
-> {displacements} (-1=LEFT, 0=CENTER, +1=RIGHT)
-> (view,shape)
'all' -> tuple of all views and shapes up to this depth
"""
return self.mesh.get_all_local_inner_ghost_slices_per_ncenters(*args, **kwds)
[docs]
def short_description(self):
"""Short description of this discrete field."""
s = "{}[name={}, pname={}, dim={},"
s += "dtype={}, init_vals={}]"
s = s.format(
self.full_tag,
self.name,
self.pretty_name,
self.dim,
self.dtype,
self.initial_values,
)
return s
[docs]
def long_description(self):
"""Long description of this discrete field."""
s = """\
{}
*name: {}
*pname: {}
*dim: {}
*resolution: {}
*dtype: {}
*initial values: {}
*domain: {}
*topology: {}
*topology_state: {}
""".format(
self.full_tag,
self.name,
self.pretty_name,
self.dim,
self.resolution,
self.dtype,
self.initial_values,
self.domain.short_description(),
self.topology.short_description(),
self.topology_state.short_description(),
)
return s
[docs]
def clone(
self, name=None, pretty_name=None, var_name=None, latex_name=None, tstate=None
):
"""
Create a DiscreteScalarField and allocate it
like the current object, possibly on a different backend.
This should only be used for debugging and testing purpose.
The generated discrete field is registered to a cloned continuous field.
"""
from hysop.tools.sympy_utils import subscript
default_name = f"{self.name}__{self._dfield._clone_id}"
default_pname = "{}__{}".format(
self.pretty_name, subscript(self._dfield._clone_id)
)
default_vname = f"{self.var_name}__{self._dfield._clone_id}"
default_lname = f"{self.latex_name}__{self._dfield._clone_id}"
self._dfield._clone_id += 1
tstate = first_not_None(tstate, self.topology_state)
pretty_name = first_not_None(pretty_name, name, default_pname)
var_name = first_not_None(var_name, name, default_vname)
latex_name = first_not_None(latex_name, name, default_lname)
name = first_not_None(name, default_name)
field = self._dfield._field
field = field.field_like(name=field.name + f"__{self._dfield._clone_id}")
topology = self._dfield._topology.topology_like()
dfield = CartesianDiscreteScalarField(
name=name,
pretty_name=pretty_name,
latex_name=latex_name,
var_name=var_name,
field=field,
topology=topology,
init_topology_state=tstate,
register_discrete_field=True,
)
dfield.copy(self)
return dfield
[docs]
def tmp_dfield_like(
self,
name,
pretty_name=None,
var_name=None,
latex_name=None,
backend=None,
is_read_only=None,
initial_values=None,
dtype=None,
grid_resolution=None,
ghosts=None,
tstate=None,
lboundaries=None,
rboundaries=None,
register_discrete_field=False,
**kwds,
):
r"""
Create a new Field and a new temporary CartesianDiscreteScalarField.
like the current object, possibly on a different backend.
/!\ The returned discrete field is not allocated.
"""
assert "global_resolution" not in kwds, "Specify grid_resolution instead."
tstate = first_not_None(tstate, self._topology_state)
if is_read_only is not None:
tstate._is_read_only = is_read_only
bfield = self._dfield._field
btopo = self._dfield._topology
field = bfield.field_like(
name=name,
pretty_name=pretty_name,
latex_name=latex_name,
var_name=var_name,
initial_values=initial_values,
dtype=dtype,
lboundaries=lboundaries,
rboundaries=rboundaries,
nb_components=kwds.pop("nb_components", None),
)
topology = btopo.topology_like(
backend=backend,
grid_resolution=grid_resolution,
ghosts=ghosts,
lboundaries=lboundaries,
rboundaries=rboundaries,
)
dfield = TmpCartesianDiscreteScalarField(
field=field, topology=topology, init_topology_state=tstate, **kwds
)
request = dfield.memory_request
request_id = dfield.memory_request_id
return (dfield, request, request_id)
[docs]
def has_ghosts(self):
"""Return True if this discrete field requires ghost exchanges."""
return self._dfield._has_ghosts
[docs]
def copy(self, from_dfield, compute_slices=False, **kwds):
"""
Initialize a field on topo with values from another field.
Parameters
-----------
from_dfield : :class:`fields.discrete_field.CartesianDiscreteScalarFieldView` or Array or buffer-like
Source data that is copied to self.
compute_slices: bool, optional, defaults to False
If set to True, apply compute slice to self and from_dfield (if from_dfield is a
CartesianDiscreteScalarFieldView), prior to copy.
"""
if isinstance(from_dfield, CartesianDiscreteScalarFieldView):
src = from_dfield.sdata
if compute_slices is True:
src = src[from_dfield.compute_slices]
else:
src = from_dfield
dst = self.sdata
if compute_slices is True:
dst = dst[self.compute_slices]
self.backend.memcpy(dst=dst, src=src, **kwds)
return self
[docs]
def fill(self, value, **kwds):
"""Set data to specified value."""
self.sdata.fill(value=value, **kwds)
return self
[docs]
def randomize(self, **kwds):
"""Initialize a the with random values."""
for d in range(self.nb_components):
self.backend.rand(out=self.data[d], **kwds)
return self
[docs]
def integrate(self, scale=True, data=None, components=None):
"""Sum all the values in the mesh."""
result = npw.zeros((1,), dtype=np.float64)
gresult = npw.zeros((1,), dtype=np.float64)
if data is None:
data = self.sdata
view = self.compute_slices
else:
view = Ellipsis
result[0] = data.get()[view].sum()
self.topology.cart_comm.Allreduce(result, gresult, op=MPI.SUM)
if scale:
gresult[0] *= np.prod(self.space_step, dtype=np.float64)
return gresult
[docs]
def norm(self, p=2, normalize=True, data=None):
"""
Compute a Lp norm of the discrete field.
Lp-norm = sum(data[d]**p)**(1.0/p) for d = 1..nb_components
summed on all grid points excluding ghosts.
Attributes
----------
p: int or float, optional
Parameter for the Lp-norm.
p can be any integer (or float) greater than 0.
Infinite norm can be returned by setting p to np.inf.
Defaults to the Euclidean norm (p=2).
normalize: bool, optional
If set to True, divide the result by the number of
compute mesh points. Not used when p=np.inf.
data: tuple of Array, optional
Custom data to apply the norm to.
By default this is the local to field data.
"""
check_instance(p, (int, float))
check_instance(normalize, bool)
result = npw.zeros((1,), dtype=self.dtype)
gresult = npw.zeros((1,), dtype=self.dtype)
if data is None:
data = self.data
view = self.compute_slices
else:
view = Ellipsis
if p == np.inf:
result[0] = (abs(data[view])).max()
self.topology.cart_comm.Allreduce(result, gresult, op=MPI.MAX)
norm = gresult
else:
tmp = abs(data[view]) ** p
result[0] = tmp.sum().get()
self.topology.cart_comm.Allreduce(result, gresult, op=MPI.SUM)
norm = gresult ** (1.0 / p)
if normalize:
norm /= self.grid_npoints
return norm
[docs]
def distance(self, other, **kwds):
"""
Compute the distance between two discrete fields
p-distance = (sum(|data[d]|**p)**1/p for d = 1..dim
where data[d] = |self.data[d] - other.data[d]|
summed on all grid points excluding ghosts.
See CartesianDiscreteScalarField.norm() for keywords parameters.
"""
check_instance(other, CartesianDiscreteScalarField)
assert "data" not in kwds
assert all(self.compute_resolution == other.compute_resolution)
sview = self.compute_slices
oview = other.compute_slices
data = self.sdata.get()[sview] - other.sdata.get()[oview]
return self.norm(data=data, **kwds)
[docs]
def accumulate_ghosts(self, **kwds):
"""
Exchange ghosts using cached ghost exchangers which are built at first use.
ie. Exchange every ghosts components of self.data using current topology state.
Specialization for ghost summation.
Defaults to ghost accumulation excluding diagonals.
"""
return self.exchange_ghosts(
ghost_op=GhostOperation.ACCUMULATE, ghost_mask=GhostMask.CROSS, **kwds
)
[docs]
def exchange_ghosts(
self,
components=None,
directions=None,
ghosts=None,
ghost_op=None,
ghost_mask=None,
exchange_method=None,
evt=None,
build_exchanger=False,
build_launcher=False,
**kwds,
):
"""
Exchange ghosts using cached ghost exchangers which are built at first use.
ie. Exchange every ghosts components of self.data using current topology state.
Defaults to full ghosts exchange, including diagonals (ie. overwrite operation).
"""
assert "data" not in kwds
msg = "Passing ghosts as an integer is not supported anymore, use a tuple of size dim instead."
if isinstance(ghosts, int):
raise RuntimeError(msg)
directions = to_tuple(first_not_None(directions, range(self.dim)), cast=int)
components = to_tuple(
first_not_None(components, range(self.nb_components)), cast=int
)
ghosts = to_tuple(first_not_None(ghosts, self.ghosts), cast=int)
ghost_op = first_not_None(ghost_op, GhostOperation.EXCHANGE)
ghost_mask = first_not_None(ghost_mask, GhostMask.FULL)
exchange_method = first_not_None(exchange_method, ExchangeMethod.ISEND_IRECV)
assert len(ghosts) == self.dim, msg
assert all(g <= mg for (g, mg) in zip(ghosts, self.ghosts))
assert len(directions) == len(set(directions))
assert 0 < len(directions) <= self.dim
if any(ghosts[i] > 0 for i in directions):
topology_state = self.topology_state
key = (
topology_state,
ghosts,
ghost_op,
ghost_mask,
exchange_method,
components,
directions,
)
if key not in self._dfield._ghost_exchangers:
self._dfield._ghost_exchangers[key] = self.build_ghost_exchanger(
ghosts=ghosts,
ghost_op=ghost_op,
ghost_mask=ghost_mask,
components=components,
directions=directions,
exchange_method=exchange_method,
)
ghost_exchanger = self._dfield._ghost_exchangers[key]
if build_exchanger:
assert evt is None
assert not build_launcher
return ghost_exchanger
if build_launcher:
assert evt is None
return ghost_exchanger._build_launcher()
else:
new_evt = ghost_exchanger(**kwds)
else:
if build_launcher:
assert evt is None
return None
else:
new_evt = None
return first_not_None(new_evt, evt)
[docs]
def build_ghost_exchanger(
self,
name=None,
components=None,
directions=None,
data=None,
ghosts=None,
ghost_op=None,
ghost_mask=None,
exchange_method=None,
):
"""
Build a ghost exchanger for cartesian discrete fields, possibly on different data.
"""
msg = "Passing ghosts as an integer is not supported anymore, use a tuple of size dim instead."
if isinstance(ghosts, int):
raise RuntimeError(msg)
ghost_op = first_not_None(ghost_op, GhostOperation.EXCHANGE)
ghost_mask = first_not_None(ghost_mask, GhostMask.FULL)
exchange_method = first_not_None(exchange_method, ExchangeMethod.ISEND_IRECV)
check_instance(ghost_op, GhostOperation)
check_instance(exchange_method, ExchangeMethod)
name = first_not_None(
name,
"{}_{}_{}_{}".format(
"_".join(self.name.split("_")[:-1]),
":".join([str(_) for _ in self.topology.proc_shape]),
ghosts,
ghost_op,
),
)
data = to_tuple(first_not_None(data, self.data))
directions = to_tuple(first_not_None(directions, range(self.dim)))
ghosts = first_not_None(ghosts, self.ghosts)
assert len(ghosts) == self.dim, msg
assert all(g <= mg for (g, mg) in zip(ghosts, self.ghosts))
assert len(directions) == len(set(directions))
assert 0 < len(directions) <= self.dim
if all(ghosts[i] == 0 for i in directions):
return None
if not data:
return None
d0 = data[0]
if isinstance(d0, (np.ndarray, HostArray)):
kind = Backend.HOST
elif __HAS_OPENCL_BACKEND__ and isinstance(d0, (clArray.Array, OpenClArray)):
kind = Backend.OPENCL
else:
kind = None
from hysop.fields.ghost_exchangers import CartesianDiscreteFieldGhostExchanger
return CartesianDiscreteFieldGhostExchanger(
name=name,
global_lboundaries_config=self.global_lboundaries_config,
global_rboundaries_config=self.global_rboundaries_config,
topology=self.topology,
data=data,
kind=kind,
ghosts=ghosts,
directions=directions,
ghost_op=ghost_op,
ghost_mask=ghost_mask,
exchange_method=exchange_method,
)
[docs]
def view(self, topology_state):
"""
Return a view on this CartesianDiscreteScalarField using given CartesianTopologyState.
"""
check_instance(topology_state, CartesianTopologyState)
return CartesianDiscreteScalarFieldView(
dfield=self._dfield, topology_state=topology_state
)
[docs]
def as_any_dfield(self, memory_order, **kwds):
"""
Quickly take a view on this DiscreteScalarFieldView using self topology state
supplemented by a MemoryOrdering.
"""
state = self.topology_state
if state.memory_order is memory_order:
return self
else:
state = state.copy()
state.memory_order = memory_order
return self.view(state)
size = property(_get_size)
shape = property(_get_shape)
compute_resolution = property(_get_compute_resolution)
resolution = property(_get_resolution)
npoints = property(_get_npoints)
ghosts = property(_get_ghosts)
space_step = property(_get_space_step)
is_tmp = property(_get_is_tmp)
mem_tag = property(_get_mem_tag)
coords = property(_get_coords)
mesh_coords = property(_get_mesh_coords)
compute_coords = property(_get_compute_coords)
compute_mesh_coords = property(_get_compute_mesh_coords)
local_boundaries = property(_get_local_boundaries)
local_lboundaries = property(_get_local_lboundaries)
local_rboundaries = property(_get_local_rboundaries)
global_boundaries = property(_get_global_boundaries)
global_lboundaries = property(_get_global_lboundaries)
global_rboundaries = property(_get_global_rboundaries)
global_boundaries_config = property(_get_global_boundaries_config)
global_lboundaries_config = property(_get_global_lboundaries_config)
global_rboundaries_config = property(_get_global_rboundaries_config)
is_at_boundary = property(_get_is_at_boundary)
is_at_left_boundary = property(_get_is_at_left_boundary)
is_at_right_boundary = property(_get_is_at_right_boundary)
periodicity = property(_get_periodicity)
compute_slices = property(_get_compute_slices)
inner_ghost_slices = property(get_inner_ghost_slices)
outer_ghost_slices = property(get_outer_ghost_slices)
all_inner_ghost_slices = property(get_all_inner_ghost_slices)
all_outer_ghost_slices = property(get_all_outer_ghost_slices)
all_inner_ghost_slices_per_ncenters = property(
get_all_inner_ghost_slices_per_ncenters
)
all_outer_ghost_slices_per_ncenters = property(
get_all_outer_ghost_slices_per_ncenters
)
grid_npoints = property(_get_grid_npoints)
axes = property(_get_axes)
tstate = property(_get_tstate)
memory_order = property(_get_memory_order)
data = property(_get_data, _set_data)
buffers = property(_get_buffers)
sdata = property(_get_sdata, _set_sdata)
sbuffer = property(_get_sbuffer)
[docs]
class CartesianDiscreteScalarField(
CartesianDiscreteScalarFieldView, DiscreteScalarField
):
"""
Discrete representation of cartesian scalar or vector fields,
handling distributed (mpi) data (hysop.core.arrays.array.Array)
wich are numpy like multidimensional arrays defined on various
array backends (numpy, OpenCL, ...).
A CartesianDiscreteScalarField is a Field discretized on a Box (on a
regular multi-dimensional grid) trough a CartesianTopology.
"""
[docs]
@debug
def __new__(
cls, field, topology, init_topology_state=None, allocate_data=True, **kwds
):
"""
Create and initialize a CartesianDiscreteScalarField from a
Field and a CartesianTopology.
Parameters
----------
field: :class:`~hysop.field.continuous_field.Field`
The continuous field that is dicrerized.
topology: :class:`~hysop.topology.cartesian.CartesianTopology`
The topology where to allocate the discrete field.
init_state: :class:`hysop.topology.cartesian_topology.CartesianTopologyState`
The init topology state (transposition state for field initialization, optional).
kwds: dict
Base class arguments.
Attributes
----------
resolution: tuple
The resolution of this field, including ghosts.
compute_resolution: tuple
The resolution of this field, excluding ghosts.
ghosts: tuple
The number of ghosts contained in this field.
shape: tuple
Alias for compute_resolution.
data: tuple of :class:`hysop.core.arrays.array.Array`
Actual n-dimensional arrays of data (immutable), one per component.
buffers: tuple of buffers, numpy.ndarray or pyopencl.buffers
Return Array's data buffers.
buffers are the lower level representation of data without any
hysop wrapping, usefull to use with external libraries.
May return the same a data().
Notes
-----
To modify data inplace use field.data[component_id][...] = ...
DiscreteScalarField.data = (arr0,arr1,arr2,...), DiscreteScalarField.data[0] = arr0 and
DiscreteScalarField._set_data(...) are equivalent and will perfom a full
copy of the arrays..
There is currenltly no way to swap data between discrete fields.
If topology state is read-only, data view may be a constant view
depending on the backend view capabilities (this is the default
state for operator input variables).
"""
msg = "Multi-component fields have been deprecated (see DiscreteTensorField)."
assert field.nb_components == 1, msg
init_state = init_topology_state or CartesianTopologyState(
field.dim, topology.mpi_params.task_id
)
obj = super().__new__(
cls,
field=field,
topology=topology,
topology_state=init_state,
dfield=None,
**kwds,
)
obj._data = None
obj._has_ghosts = (topology.mesh.ghosts > 0).any()
if allocate_data:
msg = "\nAllocation of {} {} on {}"
msg += "\n (compute_res={}, ghosts={}, dtype={}, size={})"
msg = msg.format(
obj.full_pretty_tag,
obj.pretty_name,
obj.topology.topology.full_pretty_tag,
obj.compute_resolution,
obj.ghosts,
obj.dtype,
bytes2str(
npw.prod(obj.resolution, dtype=npw.int64) * obj.dtype.itemsize
),
)
vprint(msg)
data = obj.backend.empty(shape=obj.resolution, dtype=obj.dtype)
obj._handle_data(data)
else:
from hysop.core.memory.memory_request import MemoryRequest
memory_request = MemoryRequest(
backend=obj.backend, dtype=obj.dtype, shape=obj.resolution
)
obj._memory_request = memory_request
obj._memory_request_id = obj.name
obj._mem_tag = field.mem_tag
return obj
@debug
def __init__(
self, field, topology, init_topology_state=None, allocate_data=True, **kwds
):
super().__init__(
field=field, topology=topology, topology_state=None, dfield=None, **kwds
)
def _handle_data(self, data):
assert self._data is None
from hysop.core.arrays.array import Array
if isinstance(data, tuple):
assert len(data) == 1
data = data[0]
check_instance(data, Array)
# initial value on the compute domain and on ghosts:
(vd, vg) = self.initial_values
if vd is not None:
data[self.mesh.local_compute_slices] = vd
if vg is not None:
for ls, rs, shape in self.mesh.local_outer_ghost_slices:
if shape is not None:
data[ls] = vg
data[rs] = vg
if self.topology_state.is_read_only:
npw.set_readonly(data)
# Store read-only views that do not own memory to
# enforce the read-only initial parameter when specified.
self._memory_request = None
self._memory_request_id = None
self._data = data.ravel()
self._compute_data_view()
@property
def is_tmp(self):
return False
@property
def mem_tag(self):
return self._field.mem_tag
def __eq__(self, other):
return id(self) == id(other)
def __hash__(self):
return id(self)
[docs]
class TmpCartesianDiscreteScalarField(CartesianDiscreteScalarField):
@debug
def __new__(cls, **kwds):
obj = super().__new__(
cls, allocate_data=False, register_discrete_field=True, **kwds
)
return obj
@debug
def __init__(self, **kwds):
super().__init__(allocate_data=False, register_discrete_field=True, **kwds)
[docs]
def honor_memory_request(self, work, op=None):
from hysop.core.memory.memory_request import MultipleOperatorMemoryRequests
op = first_not_None(op, self.name)
if isinstance(work, MultipleOperatorMemoryRequests):
(data,) = work.get_buffer(op, self._memory_request_id)
else:
data = work
self._handle_data(data)
@property
def is_tmp(self):
return True
[docs]
class CartesianDiscreteTensorField(
CartesianDiscreteScalarFieldViewContainerI, DiscreteTensorField
):
pass
CartesianDiscreteField = (CartesianDiscreteScalarField, CartesianDiscreteTensorField)
"""A CartesianDiscreteField is either of CartesianDiscreteScalarField or a CartesianDiscreteTensorField"""